import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import os
import pickle
%matplotlib inline
save_images = True
draw_inner_images = True
repo_folder = './'
cam_cal_folder = './camera_cal/'
test_folder = './test_images/'
out_img_folder = './output_images/'
chal_folder = './chal_images/'
def BGR2RGB(img):
b, g, r = None, None, None
b, g, r = cv2.split(img)
return cv2.merge([r,g,b])
def calibrate_save(cam_cal_folder):
cal_nx = 9
cal_ny = 6
cal_sh = (cal_nx, cal_ny)
images = glob.glob(cam_cal_folder + 'calibration*.jpg')
# Arrays to store object points and image points from all the images
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
# Prepare object points, like (0,0,0),(1,0,0),(2,0,0),...,(7,5,0)
objp = np.zeros((cal_ny*cal_nx,3), np.float32)
objp[:,:2] = np.mgrid[0:cal_nx,0:cal_ny].T.reshape(-1,2) # x,y coordinates
for imgpath in images:
img = cv2.imread(imgpath)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, cal_sh, None)
# If corners are found, add object points, image points
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
# Save the camera calibration result for later use
dist_pickle = {}
dist_pickle['mtx'] = mtx
dist_pickle['dist'] = dist
pickle.dump(dist_pickle, open(cam_cal_folder + 'calibration_pickle.p', 'wb'))
return mtx, dist
# Read in the saved objpoints and imgpoints
def calibrate_load():
try:
dist_pickle = pickle.load(open(cam_cal_folder + 'calibration_pickle.p', 'rb'))
mtx = dist_pickle['mtx']
dist = dist_pickle['dist']
return mtx, dist
except:
return None, None
mtx, dist = calibrate_load()
if mtx is None or dist is None:
mtx, dist = calibrate_save(cam_cal_folder)
if draw_inner_images:
cal_img = mpimg.imread(cam_cal_folder + 'calibration5.jpg')
cal_undst = cv2.undistort(cal_img, mtx, dist, None, mtx)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(cal_img)
ax1.set_title('Original', fontsize=30)
ax2.imshow(cal_undst)
ax2.set_title('Undistorted', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
if save_images:
plt.savefig(out_img_folder + 'undistort_output.png')
image_t5 = mpimg.imread(test_folder + 'test5.jpg')
undst_t5 = cv2.undistort(image_t5, mtx, dist, None, mtx)
if draw_inner_images:
plt.figure(figsize=(18,12))
plt.title('Original Image', fontsize=30)
plt.imshow(image_t5)
plt.figure(figsize=(18,12))
plt.title('Undistorted Image', fontsize=30)
plt.imshow(undst_t5)
if save_images:
cv2.imwrite(out_img_folder + 'undistorted.jpg', BGR2RGB(undst_t5))
# Takes an image, gradient orientation, and threshold min / max values.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(20, 100)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
abs_sobel = np.absolute(sobelx)
if orient == 'y':
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobel = np.absolute(sobely)
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# Return the result
return binary_output
# Return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img, sobel_kernel=3, mag_thresh=(30, 100)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return binary_output
# Threshold an image for a given range and Sobel kernel
def dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
def color_threshold(img, sthr=(100, 255), vthr=(50,255), draw_image=False):
# Convert to HLS space and take the S channel
# Create a binary image of ones where threshold is met, zeros otherwise
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
S = hls[:,:,2]
sbin = np.zeros_like(S)
sbin[(S >= sthr[0]) & (S <= sthr[1])] = 1
# Convert to HSV space and take the V channel
# Create a binary image of ones where threshold is met, zeros otherwise
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
V = hsv[:,:,2]
vbin = np.zeros_like(V)
vbin[(V >= vthr[0]) & (V <= vthr[1])] = 1
# Combine binary image of S channel of HLS, and
# binary image of V channel of HSV
output = np.zeros_like(S)
output[(sbin == 1) & (vbin == 1)] = 1
if draw_image:
f, ax = plt.subplots(2, 2, figsize=(12, 9))
f.tight_layout()
ax[0,0].imshow(img)
ax[0,0].set_title('Original', fontsize=20)
ax[0,1].imshow(output, cmap='gray')
ax[0,1].set_title('Combined', fontsize=20)
ax[1,0].imshow(sbin, cmap='gray')
ax[1,0].set_title('Binary S', fontsize=20)
ax[1,1].imshow(vbin, cmap='gray')
ax[1,1].set_title('Binary V', fontsize=20)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
return output
def color_transform_bxsv(img, draw_image=False, draw_steps=False):
"""
In order to eliminate lines that are not yellow or white:
Calculate X gradient
Calculate V threshold
Calculate RGB Thresholds for yellow and white
Combine X, V with and
Cobmine XandV, yellow, white with or
"""
xthr = (12, 255)
vthr = (180, 255)
HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
V = HSV[:,:,2]
yellow = cv2.inRange(HSV, (16, 70, 100), (36, 255, 255))
sensitivity_1 = 48
white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))
sensitivity_2 = 53
white_2 = cv2.inRange(HLS, (0,255-sensitivity_2, 0), (255,255,sensitivity_2))
white_3 = cv2.inRange(img, (200, 200, 200), (255,255,255))
bit_layer = yellow | white | white_2 | white_3
gradx = abs_sobel_thresh(img, orient='x', thresh=xthr)
vbin = np.zeros_like(V)
vbin[(V >= vthr[0]) & (V <= vthr[1])] = 255
gradxv = (gradx * 255) & vbin
result = gradxv | yellow | white | white_2 | white_3
if draw_image:
plt.figure(figsize=(18, 12))
plt.title('img', fontsize=30)
plt.imshow(img)
plt.figure(figsize=(18, 12))
plt.title('result', fontsize=30)
plt.imshow(result, cmap='gray')
plt.figure(figsize=(18, 12))
plt.title('bit_layer', fontsize=30)
plt.imshow(bit_layer, cmap='gray')
plt.figure(figsize=(18, 12))
plt.title('gradxv', fontsize=30)
plt.imshow(gradxv, cmap='gray')
return result
if draw_inner_images:
ctr_t5 = color_transform_bxsv(undst_t5)
plt.figure(figsize=(18,12))
plt.title('Thresholded Binary Image', fontsize=30)
plt.imshow(ctr_t5, cmap='gray')
if save_images:
cv2.imwrite(out_img_folder + 'thresholded.jpg', ctr_t5 * 255)
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
def perspective(img, draw_image=False):
# work on defining perspective transformation area
is0 = img.shape[0]
is1 = img.shape[1]
img_size = (is1, is0)
bot_width = .45 # percent of bottom trapizoid height
mid_width = .08 # percent of middle trapizoid height
height_pct = .65 # percent for trapizoid height
bottom_trim = .95 # percent from top to bottom to avoid car hood
midxb = 0.50
midxt = 0.50
tlx = is1 * (midxt - mid_width / 2) # top left x
trx = is1 * (midxt + mid_width / 2) # top right x
brx = is1 * (midxb + bot_width / 2) # bottom right x
blx = is1 * (midxb - bot_width / 2) # bottom left x
src = np.float32([[tlx, is0 * height_pct],
[trx, is0 * height_pct],
[brx, is0 * bottom_trim],
[blx, is0 * bottom_trim]])
offset = is1 * .25
dst = np.float32([[offset, 0],
[is1 - offset, 0],
[is1 - offset, is0],
[offset, is0]])
if draw_image:
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
]
path = Path(np.concatenate((src, [src[0,:]]), axis=0), codes)
patch_src = patches.PathPatch(path, lw=3, fill=False, color='blue')
path = Path(np.concatenate((dst, [dst[0,:]]), axis=0), codes)
patch_dst = patches.PathPatch(path, lw=3, fill=False, color='blue')
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
if draw_image:
fig = plt.figure()
f, (ax1, ax2) = plt.subplots(1,2,figsize=(24,9))
ax1.set_title('Source image')
ax1.imshow(img, cmap='gray')
ax1.add_patch(patch_src)
ax2.set_title('Warped image')
ax2.imshow(warped, cmap='gray')
ax2.add_patch(patch_dst)
if save_images and draw_image:
plt.savefig(out_img_folder + 'warped_straight_lines.jpg')
return warped, M, Minv
if draw_inner_images:
image_s1 = mpimg.imread(test_folder + 'straight_lines1.jpg')
warped_s1, M_s1, Minv_s1 = perspective(image_s1, True)
if draw_inner_images:
warped_t5, M_t5, Minv_t5 = perspective(undst_t5, False)
plt.figure(figsize=(18,12))
plt.title('warped_t5', fontsize=30)
plt.imshow(warped_t5, cmap='gray')
binary_t5 = color_transform_bxsv(warped_t5)
plt.figure(figsize=(18,12))
plt.title('Warped Thresholded Binary Image', fontsize=30)
plt.imshow(binary_t5, cmap='gray')
class tracker():
# When starting a new instance please be sure
# to specify all unassigned variables
def __init__(self, width, height, margin, ym=1, xm=1):
# list that stores all the past (left, right) center set values
# used for smoothing the output
self.recent_centers = []
# the window pixel width of the center values,
# used to count pixels inside center windows to determine curve values
self.window_width = width
# the window pixel width of the center values,
# used to count pixels inside center windows to determine
# curve values breaks the image into vertical levels
self.window_height = height
# The pixel distance in both directions to slide
self.margin = margin
self.ym_per_pix = ym # meters per pixel in vertical axis
self.xm_per_pix = xm # meters per pixel in horizontal axis
# Previous A, B, C values for left lane Polynomial (pixel)
self.left_fit = None
# Previous A, B, C values for right lane Polynomial (pixel)
self.right_fit = None
# Previous Inner Polygon
self.inner_lane = None
# Previous image has a successful fit or not
self.fit = False
# Count of consecutive images that lanes are not detected
# Reset to zero on a successful fit
self.undetected = 0
# the main tracking function for finding and storing lane segment positions
def find_window_centroids(self, warped):
window_width = self.window_width
window_height = self.window_height
margin_left = self.margin
margin_right = self.margin
# Store the (left, right) window centroid positions per level
left_centroids = []
right_centroids = []
# Create our window template that we will use for convolutions
window = np.ones(window_width)
ws0 = warped.shape[0]
ws1 = warped.shape[1]
# First find the two starting positions for the left and right lane by
# using np.sum to get the vertical image slice and then np.convolve
# the vertical image slice with the window template
# Sum half bottom of image to get slice
l_warped = warped[int(ws0/2):,:int(ws1/2)]
l_sum = np.sum(l_warped, axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_warped = warped[int(ws0/2):,int(ws1/2):]
r_sum = np.sum(r_warped, axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(ws1/2)
res_yvals = np.arange(ws0 - (window_height / 2), 0, -window_height)
if l_sum.sum() > 0:
left_centroids.append((l_center, res_yvals[0]))
else:
l_center = int(ws1/4)
if r_sum.sum() > 0:
right_centroids.append((r_center, res_yvals[0]))
else:
r_center = int(3 * ws1/4)
# Add what we found for the first layer
offset = window_width / 2
# Go through each layer looking for max pixel locations
for level in range(1, (int)(ws0/window_height)):
# convolve the window into the vertical slice of the image
warped_layer = warped[int(ws0-(level+1)*window_height):int(ws0-level*window_height),:]
image_layer = np.sum(warped_layer, axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is
# at right side of window, not center of window
offset = window_width / 2
l_min_index = int(max(l_center+offset-margin_left,0))
l_max_index = int(min(l_center+offset+margin_left,ws1))
l_conv_signal = conv_signal[l_min_index:l_max_index]
l_conv_signal0 = (l_conv_signal.shape[0] == 0)
l_count = 0
r_count = 0
if not l_conv_signal0:
l_argmax = np.argmax(l_conv_signal)
l_count = l_conv_signal[l_argmax]
# If the number of the detected points is greater than 5
# Add centroid, else skip for that window
if not l_conv_signal0 and l_count > 5:
l_min_offset = l_min_index-offset
l_center = l_argmax + l_min_offset
left_centroids.append((l_center, res_yvals[level]))
margin_left = self.margin
else:
margin_left = int(min(margin_left * 2, ws1/8))
# Find the best centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin_right,0))
r_max_index = int(min(r_center+offset+margin_right,ws1))
r_conv_signal = conv_signal[r_min_index:r_max_index]
r_conv_signal0 = (r_conv_signal.shape[0] == 0)
if not r_conv_signal0:
r_argmax = np.argmax(r_conv_signal)
r_count = r_conv_signal[r_argmax]
# If the number of the detected points is greater than 5
# Add centroid, else skip for that window
if not r_conv_signal0 and r_count > 5:
r_min_offset = r_min_index-offset
r_center = r_argmax + r_min_offset
right_centroids.append((r_center, res_yvals[level]))
margin_right = self.margin
else:
margin_right = int(min(margin_right * 2, ws1/8))
return left_centroids, right_centroids
def window_mask(width, height, img_ref, centerx, centery):
output = np.zeros_like(img_ref)
is0 = img_ref.shape[0]
is1 = img_ref.shape[1]
y_start = centery
y_end = y_start + height
output[y_start:y_end,
max(0, int(centerx - width)):min(int(centerx + width), is1)] = 1
return output
def process_image(img, img_name="", draw_image=False):
is0 = img.shape[0]
is1 = img.shape[1]
img_size = (is1, is0)
undst = cv2.undistort(img, mtx, dist, None, mtx)
perspect, M, Minv = perspective(undst)
warped = color_transform_bxsv(perspect)
window_width = curve_centers.window_width
window_height = curve_centers.window_height
#margin = curve_centers.margin
margin = 100
ym_per_pix = curve_centers.ym_per_pix
xm_per_pix = curve_centers.xm_per_pix
ws0 = warped.shape[0]
ws1 = warped.shape[1]
yvals = range(0, ws0)
left_fit = None
right_fit = None
if (curve_centers.fit):
left_fit = curve_centers.left_fit
right_fit = curve_centers.right_fit
else:
left_centroids, right_centroids = curve_centers.find_window_centroids(warped)
if draw_image:
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Points used to find the left and right lanes
rightx = []
leftx = []
righty = []
lefty = []
# Go through each level and draw the windows
for level in range(0, len(left_centroids)):
leftx_window = left_centroids[level][0]
leftx.append(leftx_window)
lefty_window = left_centroids[level][1]
lefty.append(lefty_window)
if draw_image:
l_mask = window_mask(window_width, window_height,
warped, leftx_window, lefty_window)
l_points[(l_points == 1) | (l_mask == 1)] = 1
for level in range(0, len(right_centroids)):
rightx_window = right_centroids[level][0]
rightx.append(rightx_window)
righty_window = right_centroids[level][1]
righty.append(righty_window)
if draw_image:
r_mask = window_mask(window_width, window_height,
warped, rightx_window, righty_window)
r_points[(r_points == 1) | (r_mask == 1)] = 1
if len(leftx) > 0 and len(lefty) > 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) > 0 and len(righty) > 0:
right_fit = np.polyfit(righty, rightx, 2)
binary_warped = warped
# New warped binary image from the next frame of video
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
if left_fit is not None:
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
if right_fit is not None:
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if left_fit is not None and leftx.shape[0] > 0 and lefty.shape[0] > 0:
left_fit = np.polyfit(lefty, leftx, 2)
curve_centers.fit = True
else:
curve_centers.fit = False
left_fit = curve_centers.left_fit
if right_fit is not None and rightx.shape[0] > 0 and righty.shape[0] > 0:
right_fit = np.polyfit(righty, rightx, 2)
else:
curve_centers.fit = False
right_fit = curve_centers.right_fit
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
# in case of no fit
left_lane = None
right_lane = None
inner_lane = None
curverad_l = -1
curverad_r = -1
side_pos = 'undefined'
center_diff = 0
# In order to calculate the radius values of the left lane and the right lane
y_eval = yvals[-1] / 2
# POLYGON
ret = 9
save_fit = True
if left_fit is not None and right_fit is not None:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
inner_lane = np.array(list(zip(np.concatenate((left_fitx + window_width/2, right_fitx[::-1]- window_width/2),axis=0),np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
# Sanity Check
# POLYGON
if curve_centers.inner_lane is not None:
ret = cv2.matchShapes(inner_lane, curve_centers.inner_lane, 1, 0.0)
if (ret >= 0.1):
save_fit = False
else:
save_fit = False
if save_fit or curve_centers.undetected > 10:
curve_centers.inner_lane = inner_lane
curve_centers.left_fit = left_fit
curve_centers.right_fit = right_fit
curve_centers.fit = True
curve_centers.undetected = 0
elif curve_centers.inner_lane is not None:
inner_lane = curve_centers.inner_lane
left_fit = curve_centers.left_fit
right_fit = curve_centers.right_fit
curve_centers.fit = False
curve_centers.undetected += 1
# Plot the lanes
# If a successful fit exists
road = np.zeros_like(img)
road_bkg = np.zeros_like(img)
if left_fit is not None:
# Generate x and y values for plotting
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
left_lane = np.array(list(zip(np.concatenate((left_fitx - window_width/2, left_fitx[::-1 ] + window_width/2),axis =0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
# Calculate the radius values in meters
resl_ym = np.array(ploty, np.float32)*ym_per_pix
resl_xm = np.array(left_fitx, np.float32)*xm_per_pix
curve_fit_cr_l = np.polyfit(resl_ym,resl_xm,2)
curverad_l = ((1 + (2*curve_fit_cr_l[0]*y_eval*ym_per_pix + curve_fit_cr_l[1])**2)**1.5) / np.absolute(2*curve_fit_cr_l[0])
# Plot the left lane
cv2.fillPoly(road, [left_lane], color=[255,0,0])
cv2.fillPoly(road_bkg, [left_lane], color=[255,255,255])
if right_fit is not None:
# Generate x and y values for plotting
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
right_lane = np.array(list(zip(np.concatenate((right_fitx - window_width/2, right_fitx[::-1] + window_width/2),axis=0),np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
# Calculate the radius values in meters
resr_ym = np.array(ploty,np.float32)*ym_per_pix
resr_xm = np.array(right_fitx, np.float32)*xm_per_pix
curve_fit_cr_r = np.polyfit(resr_ym,resr_xm,2)
curverad_r = ((1 + (2*curve_fit_cr_r[0]*y_eval*ym_per_pix + curve_fit_cr_r[1])**2)**1.5) / np.absolute(2*curve_fit_cr_r[0])
# Plot the right lane
cv2.fillPoly(road, [right_lane], color=[0,0,255])
cv2.fillPoly(road_bkg, [right_lane], color=[255,255,255])
if inner_lane is not None:
cv2.fillPoly(road, [inner_lane], color=[0,255,0])
# Calculate the offset of the car on the road
camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
center_diff = (camera_center - ws1 / 2) * xm_per_pix
side_pos = 'left'
if center_diff <= 0:
side_pos = 'right'
# Reverse perspective transform the lanes
road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)
road_warped = cv2.warpPerspective(road, Minv, img_size, flags=cv2.INTER_LINEAR)
# Combine lanes with the original image
base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
result = cv2.addWeighted(base, 1.0, road_warped, 0.5, 0.0)
# Draw the text showing curvature, offset, and speed
cv2.putText(result, 'Radius of Curvature Left: ' +str(round(curverad_l,3))+'(m) Right: ' +str(round(curverad_r,3))+'(m)',(50,50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2)
cv2.putText(result,'Vehicle is '+str(abs(round(center_diff,3)))+'m '+side_pos+' of center',(50,100),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2)
if draw_image:
# Plot warped binary image
# Plot color fit lines, lanes are colored on the warped image
plt.figure(figsize=(18,12))
plt.title('warped')
plt.imshow(warped, cmap='gray')
if save_images:
cv2.imwrite(out_img_folder + 'warpedbinary.jpg', warped * 255)
warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)
warpage *= 255
if left_lane is not None:
warpage[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
if right_lane is not None:
warpage[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.figure(figsize=(18,12))
plt.title('Warpage Lanes')
plt.imshow(warpage)
if left_fit is not None:
plt.plot(left_fitx, ploty, color='yellow')
if right_fit is not None:
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
if save_images:
plt.savefig(out_img_folder + 'color_fit_lines.jpg')
if save_images:
cv2.imwrite(out_img_folder + 'example_output.jpg', BGR2RGB(result))
return result
if draw_inner_images:
window_width = 25
window_height = 80
curve_centers = tracker(width=window_width, height=window_height,
margin=50, ym=30 / 720, xm=3.7 / 700)
# Make a list of test images
images = []
#images += glob.glob(test_folder + '/straight_lines*.jpg')
#images += glob.glob(test_folder + '/test*.jpg')
#images.append(test_folder + 'straight_lines1.jpg')
#images.append(test_folder + 'test1.jpg')
images.append(test_folder + 'test5.jpg')
if len(images) > 5:
draw_image = False
else:
draw_image = True
for idx, fname in enumerate(images):
"""
curve_centers = tracker(width=window_width, height=window_height,
margin=50, ym=30 / 720, xm=3.7 / 700)
"""
img_paths = fname.split('/')
imgpath = fname
fidx = img_paths[-1][2:-4]
img = mpimg.imread(imgpath)
result = process_image(img, img_name=img_paths[-1], draw_image=draw_image)
plt.figure(figsize=(18, 12))
plt.title(img_paths[-1] + 'result', fontsize=30)
plt.imshow(result)
if idx >= 10:
break
from moviepy.editor import VideoFileClip
from IPython.display import HTML
idx = 0
curve_centers = tracker(width=25, height=80,
margin=50, ym=30 / 720, xm=3.7 / 700)
output_project_video = out_img_folder + 'output_tracked.mp4'
input_project_video = repo_folder + 'project_video.mp4'
clip1 = VideoFileClip(input_project_video)
video_clip = clip1.fl_image(process_image) # COLOR IMAGES
video_clip.write_videofile(output_project_video, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(output_project_video))
idx = 0
curve_centers = tracker(width=25, height=80,
margin=50, ym=30 / 720, xm=3.7 / 700)
output_challange_video = out_img_folder + 'challenge_tracked.mp4'
input_challange_video = repo_folder + 'challenge_video.mp4'
clip1 = VideoFileClip(input_challange_video)
video_clip = clip1.fl_image(process_image) # COLOR IMAGES
video_clip.write_videofile(output_challange_video, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(output_challange_video))